bitkeeper revision 1.1159.182.2 (419e35262uwuQFurr1RBSu_bioXF2A)
authorcl349@arcadians.cl.cam.ac.uk <cl349@arcadians.cl.cam.ac.uk>
Fri, 19 Nov 2004 18:02:14 +0000 (18:02 +0000)
committercl349@arcadians.cl.cam.ac.uk <cl349@arcadians.cl.cam.ac.uk>
Fri, 19 Nov 2004 18:02:14 +0000 (18:02 +0000)
Many files:
  Whitespace.
irq_vectors.h:
  Define TIMER_IRQ.
  Remove unused bits and cleanup whitespace.
time.c:
  Use TIMER_IRQ.
skbuff.c:
  whitespace.

12 files changed:
linux-2.6.10-rc2-xen-sparse/arch/xen/i386/kernel/time.c
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/io.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mach-xen/irq_vectors.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/mmu_context.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/page.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/param.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgtable-2level.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/pgtable.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/system.h
linux-2.6.10-rc2-xen-sparse/include/asm-xen/asm-i386/tlbflush.h
linux-2.6.10-rc2-xen-sparse/net/core/skbuff.c

index 81f03f407281db7e331bfb7d68ef7ce681b14db8..d4531c043fac0535b2a024f9fc3bd6a41e157381 100644 (file)
@@ -616,7 +616,7 @@ void __init hpet_time_init(void)
 #endif
 
 /* Dynamically-mapped IRQ. */
-static int time_irq;
+static int TIMER_IRQ;
 
 static struct irqaction irq_timer = {
        timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer",
@@ -646,9 +646,9 @@ void __init time_init(void)
                BUG();
        printk(KERN_INFO "Using %s for high-res timesource\n",cur_timer->name);
 
-       time_irq = bind_virq_to_irq(VIRQ_TIMER);
+       TIMER_IRQ = bind_virq_to_irq(VIRQ_TIMER);
 
-       (void)setup_irq(time_irq, &irq_timer);
+       (void)setup_irq(TIMER_IRQ, &irq_timer);
 }
 
 /* Convert jiffies to system time. Call with xtime_lock held for reading. */
index ea7e5c1494b389664c986cbab53c1afc2a49c2e7..78cea26db7b8aaea465e7600bdfd4db75f4ed211 100644 (file)
@@ -90,14 +90,17 @@ static inline void * phys_to_virt(unsigned long address)
  * Change "struct page" to physical address.
  */
 #define page_to_pseudophys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
-#define page_to_phys(page)       (phys_to_machine(page_to_pseudophys(page)))
+#define page_to_phys(page)      (phys_to_machine(page_to_pseudophys(page)))
 
-#define bio_to_pseudophys(bio) (page_to_pseudophys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
-#define bvec_to_pseudophys(bv) (page_to_pseudophys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
+#define bio_to_pseudophys(bio)  (page_to_pseudophys(bio_page((bio))) + \
+                                 (unsigned long) bio_offset((bio)))
+#define bvec_to_pseudophys(bv)  (page_to_pseudophys((bv)->bv_page) + \
+                                 (unsigned long) (bv)->bv_offset)
 
 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2)      \
        (((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) && \
-        ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == bvec_to_pseudophys((vec2))))
+        ((bvec_to_pseudophys((vec1)) + (vec1)->bv_len) == \
+         bvec_to_pseudophys((vec2))))
 
 extern void __iomem * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
 
@@ -135,7 +138,8 @@ extern void bt_iounmap(void *addr, unsigned long size);
 #define isa_virt_to_bus(_x) isa_virt_to_bus_is_UNSUPPORTED->x
 #define isa_page_to_bus(_x) isa_page_to_bus_is_UNSUPPORTED->x
 #ifdef CONFIG_XEN_PRIVILEGED_GUEST
-#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - ((_x) >> PAGE_SHIFT))
+#define isa_bus_to_virt(_x) (void *)__fix_to_virt(FIX_ISAMAP_BEGIN - \
+                                                 ((_x) >> PAGE_SHIFT))
 #else
 #define isa_bus_to_virt(_x) isa_bus_to_virt_needs_PRIVILEGED_BUILD
 #endif
@@ -367,7 +371,7 @@ static inline unsigned type in##bwl##_quad(int port, int quad) { \
 static inline unsigned type in##bwl(int port) { \
        return in##bwl##_quad(port, 0); \
 }
-#else 
+#else
 #define __BUILDIO(bwl,bw,type) \
 static inline void out##bwl(unsigned type value, int port) { \
        out##bwl##_local(value, port); \
index b8384efc5355c9976abb00716148f1591f220f9e..30f4d88b62dc47f80be9ea48e3f79d7fcf82d82b 100644 (file)
@@ -65,7 +65,7 @@
 #define FIRST_DEVICE_VECTOR    0x31
 #define FIRST_SYSTEM_VECTOR    0xef
 
-/*  #define TIMER_IRQ _EVENT_TIMER */
+#define TIMER_IRQ              timer_irq
 
 /*
  * 16 8259A IRQ's, 208 potential APIC interrupt sources.
  * should be changed accordingly.
  */
 #define NR_VECTORS 256
-
-#ifdef CONFIG_PCI_USE_VECTOR
-#define NR_IRQS FIRST_SYSTEM_VECTOR
-#define NR_IRQ_VECTORS NR_IRQS
-#else
-#ifdef CONFIG_X86_IO_APIC
-#define NR_IRQS 224
-# if (224 >= 32 * NR_CPUS)
-# define NR_IRQ_VECTORS NR_IRQS
-# else
-# define NR_IRQ_VECTORS (32 * NR_CPUS)
-# endif
-#else
-#define NR_IRQS 16
-#define NR_IRQ_VECTORS NR_IRQS
-#endif
-#endif
 #endif
 
 #define FPU_IRQ                        13
  *     are bound using the provided bind/unbind functions.
  */
 
-#define PIRQ_BASE   0
-#define NR_PIRQS  128
+#define PIRQ_BASE              0
+#define NR_PIRQS               128
 
-#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
-#define NR_DYNIRQS  128
+#define DYNIRQ_BASE            (PIRQ_BASE + NR_PIRQS)
+#define NR_DYNIRQS             128
 
-#define NR_IRQS   (NR_PIRQS + NR_DYNIRQS)
-#define NR_IRQ_VECTORS NR_IRQS
+#define NR_IRQS                        (NR_PIRQS + NR_DYNIRQS)
+#define NR_IRQ_VECTORS         NR_IRQS
 
-#define pirq_to_irq(_x)   ((_x) + PIRQ_BASE)
-#define irq_to_pirq(_x)   ((_x) - PIRQ_BASE)
+#define pirq_to_irq(_x)                ((_x) + PIRQ_BASE)
+#define irq_to_pirq(_x)                ((_x) - PIRQ_BASE)
 
-#define dynirq_to_irq(_x) ((_x) + DYNIRQ_BASE)
-#define irq_to_dynirq(_x) ((_x) - DYNIRQ_BASE)
+#define dynirq_to_irq(_x)      ((_x) + DYNIRQ_BASE)
+#define irq_to_dynirq(_x)      ((_x) - DYNIRQ_BASE)
 
 #ifndef __ASSEMBLY__
 /* Dynamic binding of event channels and VIRQ sources to Linux IRQ space. */
index 159310f1bee9ffc3c92c92682ec8c645f4eab625..a815fad09dfb310b9ac34bb6446db484d6bdc70a 100644 (file)
@@ -66,10 +66,9 @@ static inline void switch_mm(struct mm_struct *prev,
 #define deactivate_mm(tsk, mm) \
        asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
 
-#define activate_mm(prev, next) \
-do { \
-       switch_mm((prev),(next),NULL); \
-       flush_page_update_queue();                      \
-} while ( 0 )
+#define activate_mm(prev, next) do {           \
+       switch_mm((prev),(next),NULL);          \
+       flush_page_update_queue();              \
+} while (0)
 
 #endif
index 32ccd0d17e3d5b33ec880f029e314accad38edda..da282bd498093fbe88772fbd341b158e7436a78b 100644 (file)
@@ -60,15 +60,15 @@ extern unsigned long *phys_to_machine_mapping;
 #define mfn_to_pfn(_mfn) (machine_to_phys_mapping[(_mfn)])
 static inline unsigned long phys_to_machine(unsigned long phys)
 {
-    unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
-    machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
-    return machine;
+       unsigned long machine = pfn_to_mfn(phys >> PAGE_SHIFT);
+       machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
+       return machine;
 }
 static inline unsigned long machine_to_phys(unsigned long machine)
 {
-    unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
-    phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
-    return phys;
+       unsigned long phys = mfn_to_pfn(machine >> PAGE_SHIFT);
+       phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
+       return phys;
 }
 
 /*
@@ -89,16 +89,8 @@ typedef struct { unsigned long pmd; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
 #define boot_pte_t pte_t /* or would you rather have a typedef */
-#if 0                          /* XXXcl for MMU_UPDATE_DEBUG */
-static inline unsigned long pte_val(pte_t x)
-{
-       unsigned long ret = x.pte_low;
-       if ( (ret & 1) ) ret = machine_to_phys(ret);
-       return ret;
-}
-#else
-#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : (x).pte_low)
-#endif
+#define pte_val(x)     (((x).pte_low & 1) ? machine_to_phys((x).pte_low) : \
+                        (x).pte_low)
 #define pte_val_ma(x)  ((x).pte_low)
 #define HPAGE_SHIFT    22
 #endif
@@ -114,25 +106,25 @@ static inline unsigned long pte_val(pte_t x)
 
 static inline unsigned long pmd_val(pmd_t x)
 {
-    unsigned long ret = x.pmd;
-    if ( (ret) ) ret = machine_to_phys(ret);
-    return ret;
+       unsigned long ret = x.pmd;
+       if (ret) ret = machine_to_phys(ret);
+       return ret;
 }
 #define pgd_val(x)     ({ BUG(); (unsigned long)0; })
 #define pgprot_val(x)  ((x).pgprot)
 
 static inline pte_t __pte(unsigned long x)
 {
-       if ( (x & 1) ) x = phys_to_machine(x);
+       if (x & 1) x = phys_to_machine(x);
        return ((pte_t) { (x) });
 }
-#define __pte_ma(x) ((pte_t) { (x) } )
+#define __pte_ma(x)    ((pte_t) { (x) } )
 static inline pmd_t __pmd(unsigned long x)
 {
-       if ( (x & 1) ) x = phys_to_machine(x);
+       if ((x & 1)) x = phys_to_machine(x);
        return ((pmd_t) { (x) });
 }
-#define __pgd(x) ({ BUG(); (pgprot_t) { 0 }; })
+#define __pgd(x)       ({ BUG(); (pgprot_t) { 0 }; })
 #define __pgprot(x)    ((pgprot_t) { (x) } )
 
 #endif /* !__ASSEMBLY__ */
@@ -179,14 +171,6 @@ extern int sysctl_legacy_va_layout;
 
 #endif /* __ASSEMBLY__ */
 
-/* 
- * XXXcl two options for PAGE_OFFSET
- * - 0xC0000000:
- *   change text offset in arch/xen/i386/kernel/vmlinux.lds.S
- *   change __pa/__va macros
- * - 0xC0100000:
- *   change TASK_SIZE 
- */
 #ifdef __ASSEMBLY__
 #define __PAGE_OFFSET          (0xC0000000)
 #else
index 658b29ed9c583a749437a927348d44e588e1290e..5964228f06e64cd7caf87edf8c82f80744242d98 100644 (file)
@@ -2,7 +2,7 @@
 #define _ASMi386_PARAM_H
 
 #ifdef __KERNEL__
-# define HZ            100/*  0 */             /* Internal kernel timer frequency */
+# define HZ            100             /* Internal kernel timer frequency */
 # define USER_HZ       100             /* .. some user interfaces are in "ticks" */
 # define CLOCKS_PER_SEC                (USER_HZ)       /* like times() */
 #endif
index 752a069e8dfde6b01122d307c0af460db5afc1a0..e9bf5f50b5161a5805f46e7c5c54630bafd5e481 100644 (file)
@@ -17,7 +17,6 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *p
                ((unsigned long long)page_to_pfn(pte) <<
                        (unsigned long long) PAGE_SHIFT)));
        flush_page_update_queue();
-       /* XXXcl queue */
 }
 /*
  * Allocate and free page tables.
@@ -38,7 +37,7 @@ static inline void pte_free_kernel(pte_t *pte)
 
 extern void pte_free(struct page *pte);
 
-#define __pte_free_tlb(tlb,pte)        tlb_remove_page((tlb),(pte))
+#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
 
 /*
  * allocating and freeing a pmd is trivial: the 1-entry pmd is
index 7000c5dbe88be95bce03c23d330744376179328c..36a2420a2969e100ae9e31bd827118a2817061af 100644 (file)
@@ -24,7 +24,7 @@ static inline int pgd_present(pgd_t pgd)      { return 1; }
  * hook is made available.
  */
 #define set_pte_batched(pteptr, pteval) \
-queue_l1_entry_update(pteptr, (pteval).pte_low)
+       queue_l1_entry_update(pteptr, (pteval).pte_low)
 #define set_pte(pteptr, pteval) (*(pteptr) = pteval)
 #define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
 /*
@@ -60,7 +60,7 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
 }
 
 #define pte_same(a, b)         ((a).pte_low == (b).pte_low)
-/*                                 
+/*
  * We detect special mappings in one of two ways:
  *  1. If the MFN is an I/O page then Xen will set the m2p entry
  *     to be outside our maximum possible pseudophys range.
@@ -82,19 +82,18 @@ static inline pte_t ptep_get_and_clear(pte_t *xp)
  */
 #define INVALID_P2M_ENTRY (~0UL)
 #define FOREIGN_FRAME(_m) ((_m) | (1UL<<((sizeof(unsigned long)*8)-1)))
-#define pte_pfn(_pte)                                                   \
-({                                                                      \
-    unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;                   \
-    unsigned long pfn = mfn_to_pfn(mfn);                                \
-    if ( (pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn) )               \
-        pfn = max_mapnr; /* special: force !pfn_valid() */              \
-    pfn;                                                                \
+#define pte_pfn(_pte)                                                  \
+({                                                                     \
+       unsigned long mfn = (_pte).pte_low >> PAGE_SHIFT;               \
+       unsigned long pfn = mfn_to_pfn(mfn);                            \
+       if ((pfn >= max_mapnr) || (pfn_to_mfn(pfn) != mfn))             \
+               pfn = max_mapnr; /* special: force !pfn_valid() */      \
+       pfn;                                                            \
 })
 
 #define pte_page(_pte) pfn_to_page(pte_pfn(_pte))
 
 #define pte_none(x)            (!(x).pte_low)
-
 #define pfn_pte(pfn, prot)     __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pte_ma(pfn, prot)  __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pfn_pmd(pfn, prot)     __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
index 8e1be8b21bb01754f0998a415674fb8cb3a4126e..e0a37293075abd208cf40af033c222af0c7fb7f6 100644 (file)
@@ -89,7 +89,7 @@ void paging_init(void);
 # define VMALLOC_END   (FIXADDR_START-2*PAGE_SIZE)
 #endif
 
-extern void * high_memory;
+extern void *high_memory;
 extern unsigned long vmalloc_earlyreserve;
 
 /*
@@ -215,7 +215,7 @@ extern unsigned long pg0[];
    can temporarily clear it. */
 #define pmd_present(x) (pmd_val(x))
 /* pmd_clear below */
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
+#define pmd_bad(x)     ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
 
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
@@ -421,30 +421,19 @@ extern pte_t *lookup_address(unsigned long address);
 #define update_mmu_cache(vma,address,pte) do { } while (0)
 #define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
 
-#if 0
 #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
        do {                                                              \
                if (__dirty) {                                            \
-                       queue_l1_entry_update((__ptep), (__entry).pte_low); \
-                       flush_tlb_page(__vma, __address);                 \
-                       xen_flush_page_update_queue();                    \
+                       if (likely(vma->vm_mm == current->mm)) {          \
+                           xen_flush_page_update_queue();                \
+                           HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, \
+                                                        entry, UVMF_INVLPG); \
+                       } else {                                          \
+                           xen_l1_entry_update((__ptep), (__entry).pte_low); \
+                           flush_tlb_page(__vma, __address);             \
+                       }                                                 \
                }                                                         \
        } while (0)
-#else
-#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
-       do {                                                              \
-               if (__dirty) {                                            \
-                       if ( likely(vma->vm_mm == current->mm) ) {        \
-                           xen_flush_page_update_queue();                \
-                           HYPERVISOR_update_va_mapping(address>>PAGE_SHIFT, entry, UVMF_INVLPG); \
-                       } else {                                          \
-                            xen_l1_entry_update((__ptep), (__entry).pte_low); \
-                           flush_tlb_page(__vma, __address);             \
-                       }                                                 \
-               }                                                         \
-       } while (0)
-
-#endif
 
 #define __HAVE_ARCH_PTEP_ESTABLISH
 #define ptep_establish(__vma, __address, __ptep, __entry)              \
@@ -455,7 +444,7 @@ do {                                                                        \
 #define __HAVE_ARCH_PTEP_ESTABLISH_NEW
 #define ptep_establish_new(__vma, __address, __ptep, __entry)          \
 do {                                                                   \
-       if ( likely((__vma)->vm_mm == current->mm) ) {                  \
+       if (likely((__vma)->vm_mm == current->mm)) {                    \
                xen_flush_page_update_queue();                          \
                HYPERVISOR_update_va_mapping((__address)>>PAGE_SHIFT,   \
                                             __entry, 0);               \
@@ -487,15 +476,15 @@ static inline unsigned long arbitrary_virt_to_phys(void *va)
 #define kern_addr_valid(addr)  (1)
 #endif /* !CONFIG_DISCONTIGMEM */
 
-#define io_remap_page_range(vma,from,phys,size,prot)                     \
-        direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+#define io_remap_page_range(vma,from,phys,size,prot) \
+       direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
 
 int direct_remap_area_pages(struct mm_struct *mm,
-                            unsigned long address, 
-                            unsigned long machine_addr,
-                            unsigned long size, 
-                            pgprot_t prot,
-                            domid_t  domid);
+                           unsigned long address, 
+                           unsigned long machine_addr,
+                           unsigned long size, 
+                           pgprot_t prot,
+                           domid_t  domid);
 int __direct_remap_area_pages(struct mm_struct *mm,
                              unsigned long address, 
                              unsigned long size, 
index 10e392b990a4e48f121c9322ff0d6e5cf61ee6f5..b8eb62d37a4ea6c91e412ccedc7eb43da64f4fd6 100644 (file)
@@ -124,10 +124,10 @@ static inline unsigned long _get_base(char * addr)
 
 static inline void wbinvd(void)
 {
-    mmu_update_t u;
-    u.ptr = MMU_EXTENDED_COMMAND;
-    u.val = MMUEXT_FLUSH_CACHE;
-    (void)HYPERVISOR_mmu_update(&u, 1, NULL);
+       mmu_update_t u;
+       u.ptr = MMU_EXTENDED_COMMAND;
+       u.val = MMUEXT_FLUSH_CACHE;
+       (void)HYPERVISOR_mmu_update(&u, 1, NULL);
 }
 
 static inline unsigned long get_limit(unsigned long segment)
@@ -449,66 +449,65 @@ struct alt_instr {
  * includes these barriers, for example.
  */
 
-#define __cli()                                                               \
-do {                                                                          \
-    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
-    barrier();                                                                \
+#define __cli()                                                                \
+do {                                                                   \
+       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;    \
+       barrier();                                                      \
 } while (0)
 
-#define __sti()                                                               \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
-    barrier(); /* unmask then check (avoid races) */                          \
-    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
-        force_evtchn_callback();                                              \
+#define __sti()                                                                \
+do {                                                                   \
+       shared_info_t *_shared = HYPERVISOR_shared_info;                \
+       barrier();                                                      \
+       _shared->vcpu_data[0].evtchn_upcall_mask = 0;                   \
+       barrier(); /* unmask then check (avoid races) */                \
+       if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )    \
+       force_evtchn_callback();                                        \
 } while (0)
 
-#define __save_flags(x)                                                       \
-do {                                                                          \
-    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
+#define __save_flags(x)                                                        \
+do {                                                                   \
+       (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;  \
 } while (0)
 
-#define __restore_flags(x)                                                    \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {            \
-        barrier(); /* unmask then check (avoid races) */                      \
-        if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )          \
-            force_evtchn_callback();                                          \
-    }                                                                         \
+#define __restore_flags(x)                                             \
+do {                                                                   \
+       shared_info_t *_shared = HYPERVISOR_shared_info;                \
+       barrier();                                                      \
+       if ( (_shared->vcpu_data[0].evtchn_upcall_mask = (x)) == 0 ) {  \
+       barrier(); /* unmask then check (avoid races) */                \
+       if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )    \
+           force_evtchn_callback();                                    \
+       }                                                               \
 } while (0)
 
-#define safe_halt()             ((void)0)
+#define safe_halt()         ((void)0)
 
-#define __save_and_cli(x)                                                     \
-do {                                                                          \
-    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
-    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;              \
-    barrier();                                                                \
+#define __save_and_cli(x)                                              \
+do {                                                                   \
+       (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;  \
+       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 1;    \
+       barrier();                                                      \
 } while (0)
 
-#define __save_and_sti(x)                                                     \
-do {                                                                          \
-    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
-    barrier();                                                                \
-    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
-    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
-    barrier(); /* unmask then check (avoid races) */                          \
-    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
-        force_evtchn_callback();                                              \
+#define __save_and_sti(x)                                              \
+do {                                                                   \
+       shared_info_t *_shared = HYPERVISOR_shared_info;                \
+       barrier();                                                      \
+       (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                 \
+       _shared->vcpu_data[0].evtchn_upcall_mask = 0;                   \
+       barrier(); /* unmask then check (avoid races) */                \
+       if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )    \
+       force_evtchn_callback();                                        \
 } while (0)
 
 #define local_irq_save(x)      __save_and_cli(x)
-#define local_irq_restore(x)    __restore_flags(x)
-#define local_save_flags(x)     __save_flags(x)
-#define local_irq_disable()     __cli()
-#define local_irq_enable()      __sti()
+#define local_irq_restore(x)   __restore_flags(x)
+#define local_save_flags(x)    __save_flags(x)
+#define local_irq_disable()    __cli()
+#define local_irq_enable()     __sti()
 
-#define irqs_disabled()                        \
-       HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
+#define irqs_disabled() HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask
 
 /*
  * disable hlt during certain critical i/o operations
index 14030f9a0493992f4888b263c6153b619ef22288..28fcf5a845c429970f50c5428d07019e82834d3e 100644 (file)
@@ -5,18 +5,13 @@
 #include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb() do {                                             \
-       xen_tlb_flush();                                                \
-} while (/*CONSTCOND*/0)
+#define __flush_tlb() xen_tlb_flush()
 
 /*
  * Global pages have to be flushed a bit differently. Not a real
  * performance problem because this does not happen often.
  */
-#define __flush_tlb_global()                                           \
-       do {                                                            \
-               xen_tlb_flush();                                        \
-       } while (0)
+#define __flush_tlb_global() xen_tlb_flush()
 
 extern unsigned long pgkern_mask;
 
@@ -30,11 +25,9 @@ extern unsigned long pgkern_mask;
 
 #define cpu_has_invlpg (boot_cpu_data.x86 > 3)
 
-#define __flush_tlb_single(addr) do {                                  \
-       xen_invlpg(addr);                                               \
-} while (/* CONSTCOND */0)
+#define __flush_tlb_single(addr) xen_invlpg(addr)
 
-# define __flush_tlb_one(addr) __flush_tlb_single(addr)
+#define __flush_tlb_one(addr) __flush_tlb_single(addr)
 
 /*
  * TLB flushing:
index 69bca069182a0ade3a7fb6a4950b8e0e44be36eb..ff3238b5dcf7c05d5e1a3221745a7fe50f359943 100644 (file)
@@ -166,7 +166,7 @@ nodata:
 /**
  *     alloc_skb_from_cache    -       allocate a network buffer
  *     @cp: kmem_cache from which to allocate the data area
- *           (object size must be big enough for @size bytes + skb overheads)
+ *          (object size must be big enough for @size bytes + skb overheads)
  *     @size: size to allocate
  *     @gfp_mask: allocation mask
  *